Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev #5

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions evaluate.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
# pose_number is how many number of keypoints we want to see in a person
# pose_threshold is the score we set to filter the keypoints whose score is small
# ground_truth='/export/home/cyh/mygithub/PoseGCN/data/annotations/val_2017/'
ground_truth='${PGPT_ROOT}/data/demodataset/annotations/'
predictions='${PGPT_ROOT}/results/demo'
results='test'
ground_truth='/PGPT/data/demodataset/annotations'
predictions='/PGPT/results/demo'
results='/PGPT/results/evaluate'
pose_number=0
pose_threshold=0.5

Expand Down
24 changes: 13 additions & 11 deletions inference/config.py → inference/config_old.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,33 @@

class Config():
root = '${PGPT_ROOT}'
root = '/PGPT'

# save_dir is the loaction where we store the results
save_dir = root + '/results/demo'

# json_path_detection is the loaction where we store the detection results
json_path_detection = root + '/results/demo_detection.json'

# gt_json_path is the gound truth of the validiation, all the ground_truth are in one file
# gt_json_path is the ground truth of the validiation, all the ground_truth are in one file
gt_json_path = root + '/data/demo_val.json'

# the data folder of the PoseTrack dataset
data_folder = root + '/data/demodataset'

# the path of the location where we store the video
video_path = root

video_path = save_dir
# the path of the track model
track_model = root + '/models/tracker.pth'

# the path of the pose estimation model
pose_model = root + '/models/pose_gcn.pth.tar'

# pose config file location
pose_cfg = root + '/cfgs/pose_res152.yaml'

# the path of the embedding model
embedding_model = root + '/models/embedding_model.pth'

def __init__(self):
print('Using the config class at', __file__)
54 changes: 37 additions & 17 deletions inference/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,13 @@

from tqdm import tqdm
import random
from config import Config

#from config import Config
import sys
sys.path.append('../lib')
from config import cfg
from config import update_config

from track_and_detect_new import Track_And_Detect

'''
Expand Down Expand Up @@ -51,13 +57,21 @@
}
'''
match_list=[13,12,14,9,8,10,7,11,6,3,2,4,1,5,0]
config = Config()
#config = Config()
def parseArgs():
parser = argparse.ArgumentParser(description="Evaluation of Pose Estimation and Tracking (PoseTrack)")
parser.add_argument('--cfg', type=str, required=True) #added by alnguyen
parser.add_argument("-t", "--detection_thresh",dest = 'det_thresh',required=False, default=0.4, type= float)
parser.add_argument("-p", "--pos_thresh",dest = 'pose_thresh',required=False, default=0, type= float)
parser.add_argument("-v", "--vis_flag",dest = 'vis_flag',required=False, default=False, type= bool)
return parser.parse_args()
parser.add_argument('opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER) #added by alnguyen

args = parser.parse_args()

return args

class DateEncoder(json.JSONEncoder):
def default(self, obj):
Expand All @@ -67,18 +81,22 @@ def default(self, obj):
return obj.tolist()
return json.JSONEncoder.default(self, obj)

def track_test(args, gpu_id=0):
def track_test():

args = parseArgs()
pose_vis_thresh = args.pose_thresh
detection_score_thresh = args.det_thresh
vis_flag = args.vis_flag
json_path = config.json_path_detection

update_config(cfg, args)
gpu_id = cfg.GPU_ID
json_path = cfg.INPUT.JSON_DETECTION_PATH
# Change temporially
save_dir = config.save_dir
save_dir = cfg.OUTPUT.SAVE_DIR

gt_json_path = config.gt_json_path
data_folder = config.data_folder
video_path = config.video_path
gt_json_path = cfg.INPUT.GT_JSON_PATH
data_folder = cfg.INPUT.DATA_FOLDER
video_path = cfg.OUTPUT.VIDEO_PATH

print('----------------------------------------------------------------------------------')
print('Detection_score_thresh: {} Vis_flag: {}'.format(detection_score_thresh, vis_flag))
Expand All @@ -87,24 +105,27 @@ def track_test(args, gpu_id=0):
if not os.path.exists(save_dir):
os.makedirs(save_dir)

# Load the Detection Results
# Load the Detection Results (demo_detection.json)
with open(json_path,'r') as f:
bbox_dict = json.load(f)

# Create the Tracker
tracker = Track_And_Detect(gpu_id=gpu_id, track_model=config.track_model, pose_model=config.pose_model, embedding_model=config.embedding_model)
track_model=cfg.INPUT.TRACK_MODEL
pose_model=cfg.INPUT.POSE_MODEL
embedding_model=cfg.INPUT.EMBEDDING_MODEL
tracker = Track_And_Detect(gpu_id=gpu_id, track_model=track_model, pose_model=pose_model, embedding_model=embedding_model)

# Load the Ground Truth to get the right video keys
# Load the Ground Truth to get the right video keys (demo_val.json)
with open(gt_json_path,'r') as f:
gt_dict = json.load(f)


video_keys = gt_dict.keys()
pbar = tqdm(range(len(video_keys)))
for video_name in video_keys:
for video_name in video_keys: #in demo_val.json
pbar.update(1)
frame_dict = bbox_dict[video_name]
video_name = video_name.replace('.json','')
#video_name = video_name.replace('.json','')
video_json = {'annolist':[]}
save_path = os.path.join(save_dir, video_name+'.json')
idx =0
Expand All @@ -124,7 +145,7 @@ def track_test(args, gpu_id=0):
if not os.path.exists(video_path):
os.makedirs(video_path)
video_store_name = video_path + '/{}.mp4'
videoWriter = cv2.VideoWriter(video_store_name.format(video_name),fourcc,10,(im_W,im_H))
videoWriter = cv2.VideoWriter(video_store_name.format(video_name+'-pgpt'),fourcc,10,(im_W,im_H))
final_list = tracker.init_tracker(frame,det_list)
else:
track_list = tracker.multi_track(frame)
Expand Down Expand Up @@ -154,5 +175,4 @@ def track_test(args, gpu_id=0):


if __name__ == "__main__":
args = parseArgs()
track_test(args=args)
track_test()
2 changes: 1 addition & 1 deletion inference/pose_estimation_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
'''
class PoseNet(object):
def __init__(self, gpu_id=0, model_path=None):
self.cfg_file='${PGPT_ROOT}/cfgs/pose_res152.yaml'
self.cfg_file='/PGPT/cfgs/pose_res152.yaml'
self.flag = 0

update_config(self.cfg_file)
Expand Down
50 changes: 38 additions & 12 deletions inference/skeleton_visulize.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,17 @@
import cv2
import numpy as np
from cv2_color import Color
import glob
#from config import Config
import sys
sys.path.append('../lib')
from config import cfg
from config import update_config

# The match list from the results to the test
match_list=[13,12,14,9,8,10,7,11,6,3,2,4,1,5,0]
color = Color(flag='bgr')


def draw_limb(image, kps, color):
def draw_line(head, tail):
if head == [] or tail == []:
Expand All @@ -33,7 +38,17 @@ def draw_line(head, tail):
for h, t in limbs:
draw_line(kps[h], kps[t])

def parseArgs():
parser = argparse.ArgumentParser(description="Visualizing the results")
parser.add_argument('--cfg', type=str, required=True) #added by alnguyen
parser.add_argument('opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER) #added by alnguyen

args = parser.parse_args()

return args

def demo(image_dir, result_dir, save_dir):
"""
Expand All @@ -42,12 +57,14 @@ def demo(image_dir, result_dir, save_dir):
save_dir: the loaction where we store the result videos
"""

json_files = os.listdir(result_dir)
#json_files = os.listdir(result_dir)
json_files = glob.glob(result_dir + "/*.json")
json_files = [json_file.split('/')[-1] for json_file in json_files]

pbar = tqdm(range(len(json_files)))
for json_name in json_files:

video_name = json_name.replace('.json','_new')
video_name = json_name.replace('.json','-pgpt')

video_folder = os.path.join(save_dir, video_name)

Expand All @@ -58,11 +75,13 @@ def demo(image_dir, result_dir, save_dir):
old_annolist = json.load(f)['annolist']
pbar.set_description('Visulizing video {}'.format(video_name))
color_list = color.get_random_color_list()
j = 0
for i,annotation in enumerate(old_annolist):
color_flag = 0
frame_name = annotation['image'][0]['name']
frame_store_path = video_folder + '/{}'.format(frame_name.split('/')[-1])
frame_path = os.path.join(image_dir,frame_name)
#annotation['image_id'] #10010010103=frame_id
frame_store_path = os.path.join(video_folder, frame_name.split('/')[-1]) #file
frame_path = os.path.join(image_dir,frame_name) #file for read
frame = cv2.imread(frame_path)
im_H, im_W, im_C = frame.shape
if i==0:
Expand All @@ -73,30 +92,37 @@ def demo(image_dir, result_dir, save_dir):
if len(anno['annopoints']) == 0:
continue
old_point_list = anno['annopoints'][0]['point']

xmin, xmax, ymin, ymax, track_id = anno['x1'][0], anno['x2'][0], anno['y1'][0], anno['y2'][0], anno['track_id'][0]
color_flag = int(track_id) % 16

kps = [[] for _ in range(15)]
for pose in old_point_list:

pose_id, pose_x, pose_y, = pose['id'][0], pose['x'][0], pose['y'][0]
kps[pose_id] = (int(pose_x), int(pose_y))
cv2.circle(frame,(int(pose_x),int(pose_y)), 3 ,color_list[color_flag], -1)
cv2.circle(frame,(int(pose_x),int(pose_y)), 3 ,color_list[color_flag], -1)
draw_limb(frame, kps, color_list[color_flag])
cv2.rectangle(frame, (int(xmin),int(ymin)), (int(xmax),int(ymax)), color_list[color_flag], 3)
cv2.putText(frame, 'id:' + str(track_id), (int(xmin),int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 1, color_list[color_flag], 2)
videoWriter.write(frame)
cv2.imwrite(frame_store_path, frame)
#videoWriter.release()
pbar.update(1)
pbar.close()


if __name__ == '__main__':
print('Visualizing the results')
image_dir = '${PGPT_ROOT}/data/demodataset/'
result_dir = '${PGPT_ROOT}/results/demo/'
save_dir = '${PGPT_ROOT}/results/render/'
#parser.add_argument("--image_dir", type=str, default=cfg.INPUT.DATA_FOLDER) # /PGPT/data/demodataset
#parser.add_argument("--result_dir", type=str, default=cfg.OUTPUT.SAVE_DIR) # /PGPT/results/demo
#parser.add_argument("--save_dir", type=str, default=cfg.OUTPUT.VIDEO_PATH) # /PGPT/results/demo


args = parseArgs()
update_config(cfg, args)

image_dir = cfg.INPUT.DATA_FOLDER
result_dir = cfg.OUTPUT.SAVE_DIR
save_dir = cfg.OUTPUT.VIDEO_PATH

if not os.path.exists(save_dir):
os.makedirs(save_dir)
demo(image_dir, result_dir, save_dir)
Expand Down
2 changes: 1 addition & 1 deletion inference/track_and_detect_new.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from tracker import SiamFCTracker

from match import Matcher
from model.nms.nms_wrapper import nms
#from model.nms.nms_wrapper import nms

class Track_And_Detect(object):
effective_track_thresh = 0.5
Expand Down
9 changes: 9 additions & 0 deletions lib/config/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# The code is based on HigherHRNet-Human-Pose-Estimation.
# (https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation)
# ------------------------------------------------------------------------------

from .default import _C as cfg
from .default import update_config
53 changes: 53 additions & 0 deletions lib/config/default.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import os
from yacs.config import CfgNode as CN


_C = CN()

_C.GPU_ID = 0 #ne marche pas pour les autres valeurs (1,2,3)
_C.SYSTEM = CN()
# Number of GPUS to use in the experiment
_C.SYSTEM.NUM_GPUS = 4
# Number of workers for doing things
_C.SYSTEM.NUM_WORKERS = 4

_C.SYSTEM.PGPT_ROOT = '/PGPT'
# pose config file location
_C.SYSTEM.POSE_CONFIG = '/PGPT/cfgs/pose_res152.yaml'

_C.INPUT = CN()
# json from detection and pose, images directory
_C.INPUT.JSON_DETECTION_PATH = '/PGPT/results/demo_detection.json'
# gt_json_path is the ground truth of the validiation, all the ground_truth are in one file
_C.INPUT.GT_JSON_PATH = '/PGPT/data/demo_val.json'
# the data folder of the PoseTrack dataset
_C.INPUT.DATA_FOLDER = '/PGPT/data/demodataset'
# the path of the track model
_C.INPUT.TRACK_MODEL = '/PGPT/models/tracker.pth'
# the path of the pose estimation model
_C.INPUT.POSE_MODEL = '/PGPT/models/pose_gcn.pth.tar'
_C.INPUT.EMBEDDING_MODEL = '/PGPT/models/embedding_model.pth'

_C.OUTPUT = CN()
# where we store the results
_C.OUTPUT.SAVE_DIR = '/PGPT/results/demo'
# the path of the location where we store the video
_C.OUTPUT.VIDEO_PATH = '/PGPT/results/demo/demo-pgpt.mp4'

def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone()

def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
cfg.freeze()


if __name__ == '__main__':
import sys
with open(sys.argv[1], 'w') as f:
print(_C, file=f)
7 changes: 4 additions & 3 deletions lib/poseval/py/eval_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,8 +227,9 @@ def process_arguments(argv):
elif len(argv)<3 or len(argv)>4:
help()

gt_file = argv[1]
pred_file = argv[2]
gt_dir = argv[1]
pred_dir = argv[2]
return gt_dir, pred_dir, mode

if not os.path.exists(gt_file):
help('Given ground truth directory does not exist!\n')
Expand Down Expand Up @@ -282,7 +283,6 @@ def load_data(argv):

return gtFramesAll, prFramesAll


def cleanupData(gtFramesAll,prFramesAll):

# remove all GT frames with empty annorects and remove corresponding entries from predictions
Expand Down Expand Up @@ -377,6 +377,7 @@ def load_data_dir(argv):
if not os.path.exists(pred_dir):
help('Given prediction directory ' + pred_dir + ' does not exist!\n')
filenames = glob.glob(gt_dir + "/*.json")

gtFramesAll = []
prFramesAll = []

Expand Down
Loading