好用的 reid 訓練框架
取得
git clone https://github.com/KaiyangZhou/deep-person-reid.git
cd deep-person-reid/
環境建置
使用 conda
conda create --name torchreid python=3.7
conda activate torchreid
# install dependencies
# make sure `which python` and `which pip` point to the correct path
pip install -r requirements.txt
# install torch and torchvision (select the proper cuda version to suit your machine)
conda install pytorch torchvision cudatoolkit=9.0 -c pytorch
# install torchreid (don't need to re-build it if you modify the source code)
python setup.py develop
使用 docker
Dockerfile
FROM pytorch/pytorch:0.4.1-cuda9-cudnn7-devel
RUN git clone https://github.com/KaiyangZhou/deep-person-reid.git
RUN cd deep-person-reid/
RUN pip install --upgrade pip
RUN apt update
RUN apt install -y libgl1-mesa-glx
RUN apt-get install -y libglib2.0-0
RUN pip install --upgrade torch
RUN pip install --upgrade tensorboard
ADD ./ ./
RUN pip install -r requirements.txt
RUN python setup.py develop
for 3090
FROM pytorch/pytorch:0.4.1-cuda9-cudnn7-devel
RUN pip install --upgrade pip
RUN apt update
RUN apt install -y libgl1-mesa-glx
RUN apt-get install -y libglib2.0-0
RUN pip install --upgrade torch
RUN pip install --upgrade tensorboard
RUN pip uninstall -y torch torchvision
RUN pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu111/torch_nightly.html
ADD ./ ./
RUN pip install -r requirements.txt
RUN python setup.py develop
build
docker build -t torchreid .
run
docker run --gpus all -d \
-it \
-e DISPLAY \
-e QT_X11_NO_MITSHM=1 \
--shm-size="8g" \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v $HOME/.Xauthority:/root/.Xauthority \
--device=/dev/video0:/dev/video0 \
--name torchreid \
--restart=always \
torchreid bash
下載訓練資料(若 visualize_actmap 無法下載)
mkdir datasets
cd datasets
kaggle datasets download pengcw1/market-1501
unzip market-1501.zip
rm market-1501.zip
測試熱區圖
python tools/visualize_actmap.py --root ./datasets/ -d market1501 -m osnet_x1_0 --weights PATH_TO_PRETRAINED_WEIGHTS --save-dir log/visactmap_osnet_x1_0_market1501
或使用 vscode
{
// 使用 IntelliSense 以得知可用的屬性。
// 暫留以檢視現有屬性的描述。
// 如需詳細資訊,請瀏覽: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Visualize activation maps",
"type": "python",
"request": "launch",
"program": "tools/visualize_actmap.py",
"console": "integratedTerminal",
"args": [
"--root",
"./datasets/",
"-d",
"market1501",
"-m",
"osnet_x1_0",
"--weights",
"PATH_TO_PRETRAINED_WEIGHTS",
"--save-dir",
"log/visactmap_osnet_x1_0_market1501"
]
}
]
}
訓練
import torchreid
trainmodel = "resnet50"
batch_size = 800
# trainmodel = "osnet_x0_25"
# batch_size = 32
datamanager = torchreid.data.ImageDataManager(
root='reid-data',
sources='market1501',
targets='market1501',
height=256,
width=128,
batch_size_train=batch_size,
batch_size_test=batch_size,
transforms=['random_flip', 'random_crop']
)
model = torchreid.models.build_model(
name=trainmodel,
num_classes=datamanager.num_train_pids,
loss='softmax',
pretrained=True
)
model = model.cuda()
optimizer = torchreid.optim.build_optimizer(
model,
optim='adam',
lr=0.0003
)
scheduler = torchreid.optim.build_lr_scheduler(
optimizer,
lr_scheduler='single_step',
stepsize=20
)
engine = torchreid.engine.ImageSoftmaxEngine(
datamanager,
model,
optimizer=optimizer,
scheduler=scheduler,
label_smooth=True
)
engine.run(
save_dir='log/'+trainmodel,
max_epoch=60,
eval_freq=10,
print_freq=10,
test_only=False
)