Skip to content

Commit 401916c

Browse files
authored
[Lint] Standardize code style and logger usage. (#1493)
* [Lint] Standardize code style and logger usage 1. Use `vlmeval.smp.get_logger` to set up module-level logger. 2. Forbid `from xxx import *` to avoid confusing function sources. 3. Use strict lint settings (Remove most error ignoring).
1 parent f67e576 commit 401916c

595 files changed

Lines changed: 5198 additions & 4562 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.pre-commit-config.yaml

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,15 +15,19 @@ exclude: |
1515
)
1616
repos:
1717
- repo: https://github.com/PyCQA/flake8
18-
rev: 6.1.0
18+
rev: 7.1.2
1919
hooks:
2020
- id: flake8
2121
args:
2222
[
2323
"--max-line-length=120",
24-
"--ignore=F401,F403,F405,E402,E722,E741,W503,E231,E702",
24+
"--ignore=W503",
2525
]
2626
exclude: ^configs/
27+
- repo: https://github.com/PyCQA/isort
28+
rev: 6.0.1
29+
hooks:
30+
- id: isort
2731
- repo: https://github.com/google/yapf
2832
rev: v0.43.0
2933
hooks:

docs/en/conf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,9 @@
1010
# If extensions (or modules to document with autodoc) are in another directory,
1111
# add these directories to sys.path here. If the directory is relative to the
1212
# documentation root, use os.path.abspath to make it absolute, like shown here.
13-
#
14-
import os
13+
1514
import ast
15+
import os
1616
import subprocess
1717
import sys
1818

docs/zh-CN/conf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,9 @@
1010
# If extensions (or modules to document with autodoc) are in another directory,
1111
# add these directories to sys.path here. If the directory is relative to the
1212
# documentation root, use os.path.abspath to make it absolute, like shown here.
13-
#
14-
import os
13+
1514
import ast
15+
import os
1616
import subprocess
1717
import sys
1818

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ setuptools
5252
sty
5353
sympy
5454
tabulate
55+
termcolor
5556
tiktoken
5657
timeout-decorator
5758
timm

run.py

Lines changed: 26 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,15 @@
1+
import argparse
2+
import copy as cp
3+
import datetime
14
import json
25
import os
6+
import os.path as osp
37
import subprocess
48
from functools import partial
59

10+
import pandas as pd
11+
from tabulate import tabulate
12+
613

714
# GET the number of GPUs on the node without importing libs like torch
815
def get_gpu_list():
@@ -14,14 +21,14 @@ def get_gpu_list():
1421
ps = subprocess.Popen(('nvidia-smi', '--list-gpus'), stdout=subprocess.PIPE)
1522
output = subprocess.check_output(('wc', '-l'), stdin=ps.stdout)
1623
return list(range(int(output)))
17-
except:
24+
except Exception:
1825
return []
1926

2027

2128
RANK = int(os.environ.get('RANK', 0))
2229
WORLD_SIZE = int(os.environ.get('WORLD_SIZE', 1))
23-
LOCAL_WORLD_SIZE = int(os.environ.get("LOCAL_WORLD_SIZE",1))
24-
LOCAL_RANK = int(os.environ.get("LOCAL_RANK",1))
30+
LOCAL_WORLD_SIZE = int(os.environ.get("LOCAL_WORLD_SIZE", 1))
31+
LOCAL_RANK = int(os.environ.get("LOCAL_RANK", 1))
2532

2633
GPU_LIST = get_gpu_list()
2734
if LOCAL_WORLD_SIZE > 1 and len(GPU_LIST):
@@ -40,12 +47,13 @@ def get_gpu_list():
4047

4148

4249
from vlmeval.config import supported_VLM
43-
from vlmeval.dataset.video_dataset_config import supported_video_datasets
4450
from vlmeval.dataset import build_dataset
51+
from vlmeval.dataset.video_dataset_config import supported_video_datasets
4552
from vlmeval.inference import infer_data_job
46-
from vlmeval.inference_video import infer_data_job_video
4753
from vlmeval.inference_mt import infer_data_job_mt
48-
from vlmeval.smp import *
54+
from vlmeval.inference_video import infer_data_job_video
55+
from vlmeval.smp import (MMBenchOfficialServer, get_pred_file_format, githash, listinstr, load,
56+
load_env, ls, prepare_reuse_files, proxy_set, setup_logger, timestr)
4957
from vlmeval.utils.result_transfer import MMMU_result_transfer, MMTBench_result_transfer
5058

5159

@@ -74,8 +82,9 @@ def build_model_from_config(cfg, model_name, use_vllm=False):
7482

7583

7684
def build_dataset_from_config(cfg, dataset_name):
77-
import vlmeval.dataset
7885
import inspect
86+
87+
import vlmeval.dataset
7988
config = cp.deepcopy(cfg[dataset_name])
8089
if config == {}:
8190
return supported_video_datasets[dataset_name]()
@@ -201,7 +210,6 @@ def parse_args():
201210

202211

203212
def main():
204-
logger = get_logger('RUN')
205213
args = parse_args()
206214
use_config, cfg = False, None
207215
if args.config is not None:
@@ -212,15 +220,19 @@ def main():
212220
else:
213221
assert len(args.data), '--data should be a list of data files'
214222

223+
if 'MMEVAL_ROOT' in os.environ:
224+
args.work_dir = os.environ['MMEVAL_ROOT']
225+
226+
date, commit_id = timestr('day'), githash(digits=8)
227+
eval_id = f"T{date}_G{commit_id}"
228+
logger = setup_logger(log_file=os.path.join(args.work_dir, 'logs', f'{eval_id}_{timestr()}.log'))
229+
215230
if RANK == 0:
216231
if not args.reuse:
217232
logger.warning('--reuse is not set, will not reuse previous (before one day) temporary files')
218233
else:
219234
logger.warning('--reuse is set, will reuse the latest prediction & temporary pickle files')
220235

221-
if 'MMEVAL_ROOT' in os.environ:
222-
args.work_dir = os.environ['MMEVAL_ROOT']
223-
224236
if not use_config:
225237
for k, v in supported_VLM.items():
226238
if hasattr(v, 'keywords') and 'retry' in v.keywords and args.retry is not None:
@@ -232,8 +244,8 @@ def main():
232244

233245
# If FWD_API is set, will use class `GPT4V` for all API models in the config
234246
if os.environ.get('FWD_API', None) == '1':
235-
from vlmeval.config import api_models as supported_APIs
236247
from vlmeval.api import GPT4V
248+
from vlmeval.config import api_models as supported_APIs
237249
for m in args.model:
238250
if m in supported_APIs:
239251
kws = supported_VLM[m].keywords
@@ -248,6 +260,7 @@ def main():
248260
)
249261

250262
for _, model_name in enumerate(args.model):
263+
logger.info(f'=========== {model_name} ===========')
251264
model = None
252265
date, commit_id = timestr('day'), githash(digits=8)
253266
eval_id = f"T{date}_G{commit_id}"
@@ -267,6 +280,7 @@ def main():
267280
model = build_model_from_config(cfg['model'], model_name, args.use_vllm)
268281

269282
for _, dataset_name in enumerate(args.data):
283+
logger.info(f'----------- {dataset_name} -----------')
270284
if WORLD_SIZE > 1:
271285
dist.barrier()
272286

run_api.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,19 @@
1+
import argparse
12
import asyncio
3+
import datetime
24
import json
35
import os
4-
import argparse
5-
import datetime
66
from functools import partial
77
from pathlib import Path
88
from typing import List
99

10-
from vlmeval.config import supported_VLM
11-
from vlmeval.dataset import build_dataset
12-
from vlmeval.smp import *
1310
from vlmeval.api import LMDeployAPI
1411
from vlmeval.api.adapters import get_adapter_registry
15-
12+
from vlmeval.config import supported_VLM
13+
from vlmeval.dataset import build_dataset
1614
from vlmeval.inference_api import APIEvalPipeline, DatasetConfig
17-
15+
from vlmeval.smp import (get_pred_file_format, githash, listinstr, load_env, prepare_reuse_files,
16+
setup_logger, timestr)
1817

1918
group_dic = {
2019
'general-mini': ['MMMU_Pro_10c'],

scripts/apires_scan.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
1+
import os.path as osp
12
import sys
2-
from vlmeval import *
3+
34
from vlmeval.dataset import SUPPORTED_DATASETS
5+
from vlmeval.smp import listinstr, load, ls
6+
47
FAIL_MSG = 'Failed to obtain answer via API.'
58

69
root = sys.argv[1]

scripts/auto_run.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
import argparse
2-
from vlmeval.smp import *
2+
import os
3+
import os.path as osp
4+
35
from vlmeval.config import supported_VLM
6+
from vlmeval.smp import listinstr
7+
48

59
def is_api(x):
610
return getattr(supported_VLM[x].func, 'is_api', False)

scripts/data_browser.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,20 +4,25 @@
44
# browse data in http://127.0.0.1:10064
55
"""
66

7-
import os
7+
import argparse
8+
import base64
9+
import copy
810
import io
911
import json
10-
import copy
12+
import os
13+
import os.path as osp
14+
import string
1115
import time
12-
import gradio as gr
13-
import base64
14-
from PIL import Image
1516
from io import BytesIO
16-
from argparse import Namespace
17-
# from llava import conversation as conversation_lib
1817
from typing import Sequence
19-
from vlmeval import *
18+
19+
import gradio as gr
20+
import pandas as pd
21+
from PIL import Image
22+
23+
from vlmeval.api import OpenAIWrapper
2024
from vlmeval.dataset import SUPPORTED_DATASETS, build_dataset
25+
from vlmeval.smp import LMUDataRoot, encode_image_file_to_base64, load
2126

2227
SYS = "You are a helpful assistant. Your job is to faithfully translate all provided text into Chinese faithfully. "
2328

scripts/mmb_eval_gradio.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,15 @@
1-
from vlmeval.smp import *
2-
from vlmeval.tools import EVAL
3-
from vlmeval.dataset import build_dataset
1+
import datetime
2+
import os
3+
import os.path as osp
4+
import shutil
5+
46
import gradio as gr
7+
import numpy as np
8+
import pandas as pd
9+
10+
from vlmeval.dataset import build_dataset
11+
from vlmeval.smp import LMUDataRoot, cn_string, dump, load, md5
12+
from vlmeval.tools import EVAL
513

614
HEADER = """
715
# Welcome to MMBench👏👏

0 commit comments

Comments
 (0)